type runtime.g
190 uses
runtime (current package)
cgo_sigaction.go#L45: var g *g
cgocall.go#L404: defer func(gp *g) {
chan.go#L716: func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
coro.go#L106: func coroswitch_m(gp *g) {
coro.go#L167: var gnext *g
debugcall.go#L163: mcall(func(gp *g) {
debugcall.go#L205: callingG *g
debugcall.go#L221: mcall(func(gp *g) {
heapdump.go#L340: func dumpgoroutine(gp *g) {
heapdump.go#L411: forEachG(func(gp *g) {
lock_futex.go#L252: func beforeIdle(int64, int64) (*g, bool) {
malloc.go#L1333: func deductAssistCredit(size uintptr) *g {
malloc.go#L1334: var assistG *g
mfinal.go#L44: var fing *g // goroutine that runs finalizers
mfinal.go#L153: func wakefing() *g {
mfinal.go#L167: func finalizercommit(gp *g, lock unsafe.Pointer) bool {
mgc.go#L373: stackRoots []*g
mgc.go#L1412: gopark(func(g *g, nodep unsafe.Pointer) bool {
mgc.go#L1731: forEachG(func(gp *g) {
mgcmark.go#L131: forEachGRace(func(gp *g) {
mgcmark.go#L415: func gcAssistAlloc(gp *g) {
mgcmark.go#L616: func gcAssistAlloc1(gp *g, scanWork int64) {
mgcmark.go#L819: func scanstack(gp *g, gcw *gcWork) int64 {
mgcpacer.go#L731: func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
mgcscavenge.go#L282: g *g
mgcsweep.go#L38: g *g
mprof.go#L1441: forEachGRace(func(gp1 *g) {
mprof.go#L1454: forEachGRace(func(gp1 *g) {
mprof.go#L1482: func tryRecordGoroutineProfileWB(gp1 *g) {
mprof.go#L1492: func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
mprof.go#L1541: func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
mprof.go#L1574: isOK := func(gp1 *g) bool {
mprof.go#L1585: forEachGRace(func(gp1 *g) {
mprof.go#L1610: forEachGRace(func(gp1 *g) {
mprof.go#L1664: func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
netpoll.go#L434: var rg, wg *g
netpoll.go#L460: var rg, wg *g
netpoll.go#L496: var rg, wg *g
netpoll.go#L529: func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
netpoll.go#L540: func netpollgoready(gp *g, traceskip int) {
netpoll.go#L591: func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g {
netpoll.go#L617: return (*g)(unsafe.Pointer(old))
netpoll.go#L636: var rg *g
netpoll.go#L645: var wg *g
panic.go#L552: func popDefer(gp *g) {
panic.go#L1115: func recovery(gp *g) {
panic.go#L1373: func dopanic_m(gp *g, pc, sp uintptr) bool {
panic.go#L1456: func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
preempt.go#L62: g *g
preempt.go#L105: func suspendG(gp *g) suspendGState {
preempt.go#L342: func wantAsyncPreempt(gp *g) bool {
preempt.go#L363: func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
proc.go#L118: g0 g
proc.go#L407: func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
proc.go#L443: func goready(gp *g, traceskip int) {
proc.go#L538: func badmcall(fn func(*g)) {
proc.go#L542: func badmcall2(fn func(*g)) {
proc.go#L582: var gcrash g
proc.go#L584: var crashingG atomic.Pointer[g]
proc.go#L632: allgs []*g
proc.go#L647: allgptr **g
proc.go#L650: func allgadd(gp *g) {
proc.go#L667: func allGsSnapshot() []*g {
proc.go#L679: func atomicAllG() (**g, uintptr) {
proc.go#L681: ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
proc.go#L686: func atomicAllGIndex(ptr **g, i uintptr) *g {
proc.go#L687: return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
proc.go#L693: func forEachG(fn func(gp *g)) {
proc.go#L705: func forEachGRace(fn func(gp *g)) {
proc.go#L882: func dumpgstatus(gp *g) {
proc.go#L1022: func ready(gp *g, traceskip int, next bool) {
proc.go#L1109: func readgstatus(gp *g) uint32 {
proc.go#L1117: func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
proc.go#L1145: func castogscanstatus(gp *g, oldval, newval uint32) bool {
proc.go#L1175: func casgstatus(gp *g, oldval, newval uint32) {
proc.go#L1279: func casGToWaiting(gp *g, old uint32, reason waitReason) {
proc.go#L1289: func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
proc.go#L1303: func casgcopystack(gp *g) uint32 {
proc.go#L1319: func casGToPreemptScan(gp *g, old, new uint32) {
proc.go#L1331: func casGFromPreempted(gp *g, old, new uint32) bool {
proc.go#L3156: func startlockedm(gp *g) {
proc.go#L3209: func execute(gp *g, inheritTime bool) {
proc.go#L3250: func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
proc.go#L3655: func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
proc.go#L3768: func checkIdleGCNoP() (*p, *g) {
proc.go#L3880: var tail *g
proc.go#L4063: func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
proc.go#L4069: func park_m(gp *g) {
proc.go#L4106: func goschedImpl(gp *g, preempted bool) {
proc.go#L4141: func gosched_m(gp *g) {
proc.go#L4146: func goschedguarded_m(gp *g) {
proc.go#L4153: func gopreempt_m(gp *g) {
proc.go#L4160: func preemptPark(gp *g) {
proc.go#L4235: func goyield_m(gp *g) {
proc.go#L4267: func goexit0(gp *g) {
proc.go#L4272: func gdestroy(gp *g) {
proc.go#L4782: func exitsyscall0(gp *g) {
proc.go#L4956: func malg(stacksize int32) *g {
proc.go#L4957: newg := new(g)
proc.go#L4993: func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
proc.go#L5102: func saveAncestors(callergp *g) *[]ancestorInfo {
proc.go#L5135: func gfput(pp *p, gp *g) {
proc.go#L5178: func gfget(pp *p) *g {
proc.go#L5408: func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
proc.go#L5961: forEachG(func(gp *g) {
proc.go#L6402: forEachG(func(gp *g) {
proc.go#L6449: func schedEnabled(gp *g) bool {
proc.go#L6493: func globrunqput(gp *g) {
proc.go#L6505: func globrunqputhead(gp *g) {
proc.go#L6528: func globrunqget(pp *p, max int32) *g {
proc.go#L6697: func runqput(pp *p, gp *g, next bool) {
proc.go#L6743: func runqputslow(pp *p, gp *g, h, t uint32) bool {
proc.go#L6744: var batch [len(pp.runq)/2 + 1]*g
proc.go#L6820: func runqget(pp *p) (gp *g, inheritTime bool) {
proc.go#L6941: func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
proc.go#L6973: func (q *gQueue) push(gp *g) {
proc.go#L6982: func (q *gQueue) pushBack(gp *g) {
proc.go#L7009: func (q *gQueue) pop() *g {
proc.go#L7039: func (l *gList) push(gp *g) {
proc.go#L7053: func (l *gList) pop() *g {
race0.go#L31: func raceacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
race0.go#L34: func racereleaseg(gp *g, addr unsafe.Pointer) { throw("race") }
race0.go#L36: func racereleaseacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
race0.go#L38: func racereleasemergeg(gp *g, addr unsafe.Pointer) { throw("race") }
runtime2.go#L266: func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
runtime2.go#L269: func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
runtime2.go#L277: func (gp *g) guintptr() guintptr {
runtime2.go#L286: func setGNoWB(gp **g, new *g) {
runtime2.go#L361: g *g
runtime2.go#L422: type g struct {
runtime2.go#L553: g0 *g // goroutine with scheduling stack
runtime2.go#L560: gsignal *g // signal-handling g
runtime2.go#L565: curg *g // current running goroutine
runtime2.go#L607: waitunlockf func(*g, unsafe.Pointer) bool
runtime2.go#L981: g *g
select.go#L62: func selparkcommit(gp *g, _ unsafe.Pointer) bool {
select.go#L237: gp *g
signal_amd64.go#L49: func (c *sigctxt) preparePanic(sig uint32, gp *g) {
signal_unix.go#L341: func doSigPreempt(gp *g, ctxt *sigctxt) {
signal_unix.go#L398: func sigFetchG(c *sigctxt) *g {
signal_unix.go#L411: gp := *(**g)(unsafe.Pointer(s.base()))
signal_unix.go#L605: var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
signal_unix.go#L606: var testSigusr1 func(gp *g) bool
signal_unix.go#L632: func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
signal_unix.go#L831: func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g {
stack.go#L745: func adjustctxt(gp *g, adjinfo *adjustinfo) {
stack.go#L771: func adjustdefers(gp *g, adjinfo *adjustinfo) {
stack.go#L783: func adjustpanics(gp *g, adjinfo *adjustinfo) {
stack.go#L789: func adjustsudogs(gp *g, adjinfo *adjustinfo) {
stack.go#L803: func findsghi(gp *g, stk stack) uintptr {
stack.go#L817: func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
stack.go#L868: func copystack(gp *g, newsize uintptr) {
stack.go#L1155: func isShrinkStackSafe(gp *g) bool {
stack.go#L1193: func shrinkstack(gp *g) {
stubs.go#L31: func getg() *g
stubs.go#L47: func mcall(fn func(*g))
stubs.go#L219: func setg(gg *g)
time.go#L307: func resetForSleep(gp *g, _ unsafe.Pointer) bool {
time.go#L369: goready(arg.(*g), 0)
trace.go#L92: reader atomic.Pointer[g] // goroutine that called ReadTrace, or nil
trace.go#L351: gp *g
trace.go#L360: forEachGRace(func(gp *g) {
trace.go#L757: gopark(func(gp *g, _ unsafe.Pointer) bool {
trace.go#L911: func traceReader() *g {
trace.go#L922: func traceReaderAvailable() *g {
traceallocfree.go#L77: forEachGRace(func(gp *g) {
traceback.go#L122: func (u *unwinder) init(gp *g, flags unwindFlags) {
traceback.go#L132: func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) {
traceback.go#L774: func printcreatedby(gp *g) {
traceback.go#L802: func traceback(pc, sp, lr uintptr, gp *g) {
traceback.go#L814: func tracebacktrap(pc, sp, lr uintptr, gp *g) {
traceback.go#L823: func traceback1(pc, sp, lr uintptr, gp *g, flags unwindFlags) {
traceback.go#L1105: func gcallers(gp *g, skip int, pcbuf []uintptr) int {
traceback.go#L1113: func showframe(sf srcFunc, gp *g, firstFrame bool, calleeID abi.FuncID) bool {
traceback.go#L1197: func goroutineheader(gp *g) {
traceback.go#L1245: func tracebackothers(me *g) {
traceback.go#L1263: forEachGRace(func(gp *g) {
traceback.go#L1337: func isSystemGoroutine(gp *g, fixed bool) bool {
tracecpu.go#L211: func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
traceruntime.go#L438: func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool) {
traceruntime.go#L492: func (tl traceLocker) GoUnpark(gp *g, skip int) {
traceruntime.go#L503: func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
traceruntime.go#L518: func emitUnblockStatus(w traceWriter, gp *g, gen uintptr) traceWriter {
traceruntime.go#L617: func (tl traceLocker) GoCreateSyscall(gp *g) {
tracestack.go#L38: func traceStack(skip int, gp *g, gen uintptr) uint64 {
 |
The pages are generated with Golds v0.7.6. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |